From 80900a058bacc66f64a425d240b711f83d391529 Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Wed, 5 Jul 2006 11:21:19 +0100 Subject: [PATCH] [HVM][VMX] Move vmcs and I/O bitmap allocation into vmx_initialise_guest_resources(). Signed-off-by: Xin B Li Signed-off-by: Keir Fraser --- xen/arch/x86/hvm/vmx/vmcs.c | 159 +++++++++++++++-------------- xen/arch/x86/hvm/vmx/vmx.c | 132 +++++++++++++++--------- xen/include/asm-x86/hvm/vmx/vmcs.h | 15 +-- 3 files changed, 173 insertions(+), 133 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index b4568342d1..374aab179a 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -41,34 +41,52 @@ #include #endif -int vmcs_size; +static int vmcs_size; +static int vmcs_order; +static u32 vmcs_revision_id; -struct vmcs_struct *vmx_alloc_vmcs(void) +void vmx_init_vmcs_config(void) { - struct vmcs_struct *vmcs; u32 vmx_msr_low, vmx_msr_high; + if ( vmcs_size ) + return; + rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high); - vmcs_size = vmx_msr_high & 0x1fff; - vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size)); - memset((char *)vmcs, 0, vmcs_size); /* don't remove this */ - vmcs->vmcs_revision_id = vmx_msr_low; - return vmcs; + vmcs_revision_id = vmx_msr_low; + + vmcs_size = vmx_msr_high & 0x1fff; + vmcs_order = get_order_from_bytes(vmcs_size); } -static void free_vmcs(struct vmcs_struct *vmcs) +static struct vmcs_struct *vmx_alloc_vmcs(void) { - int order; + struct vmcs_struct *vmcs; - order = get_order_from_bytes(vmcs_size); - free_xenheap_pages(vmcs, order); + if ( (vmcs = alloc_xenheap_pages(vmcs_order)) == NULL ) + { + DPRINTK("Failed to allocate VMCS.\n"); + return NULL; + } + + memset(vmcs, 0, vmcs_size); /* don't remove this */ + vmcs->vmcs_revision_id = vmcs_revision_id; + + return vmcs; +} + +static void vmx_free_vmcs(struct vmcs_struct *vmcs) +{ + free_xenheap_pages(vmcs, vmcs_order); } static void __vmx_clear_vmcs(void *info) { struct vcpu *v = info; + __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs)); + v->arch.hvm_vmx.active_cpu = -1; v->arch.hvm_vmx.launched = 0; } @@ -128,11 +146,19 @@ void vmx_vmcs_exit(struct vcpu *v) vcpu_unpause(v); } +struct vmcs_struct *vmx_alloc_host_vmcs(void) +{ + return vmx_alloc_vmcs(); +} + +void vmx_free_host_vmcs(struct vmcs_struct *vmcs) +{ + vmx_free_vmcs(vmcs); +} + static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx) { int error = 0; - void *io_bitmap_a; - void *io_bitmap_b; error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, MONITOR_PIN_BASED_EXEC_CONTROLS); @@ -141,19 +167,8 @@ static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx) error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS); - /* need to use 0x1000 instead of PAGE_SIZE */ - io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); - io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); - memset(io_bitmap_a, 0xff, 0x1000); - /* don't bother debug port access */ - clear_bit(PC_DEBUG_PORT, io_bitmap_a); - memset(io_bitmap_b, 0xff, 0x1000); - - error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_maddr(io_bitmap_a)); - error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_maddr(io_bitmap_b)); - - arch_vmx->io_bitmap_a = io_bitmap_a; - arch_vmx->io_bitmap_b = io_bitmap_b; + error |= __vmwrite(IO_BITMAP_A, (u64)virt_to_maddr(arch_vmx->io_bitmap_a)); + error |= __vmwrite(IO_BITMAP_B, (u64)virt_to_maddr(arch_vmx->io_bitmap_b)); return error; } @@ -429,67 +444,52 @@ static inline int construct_vmcs_host(void) } /* - * Need to extend to support full virtualization. + * the working VMCS pointer has been set properly + * just before entering this function. */ static int construct_vmcs(struct vcpu *v, cpu_user_regs_t *regs) { struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; int error; - long rc; - - memset(arch_vmx, 0, sizeof(struct arch_vmx_struct)); - spin_lock_init(&arch_vmx->vmcs_lock); - - /* - * Create a new VMCS - */ - if (!(arch_vmx->vmcs = vmx_alloc_vmcs())) { - printk("Failed to create a new VMCS\n"); - return -ENOMEM; - } - - __vmx_clear_vmcs(v); - vmx_load_vmcs(v); - - if ((error = construct_vmcs_controls(arch_vmx))) { - printk("construct_vmcs: construct_vmcs_controls failed\n"); - rc = -EINVAL; - goto err_out; + if ( (error = construct_vmcs_controls(arch_vmx)) ) { + printk("construct_vmcs: construct_vmcs_controls failed.\n"); + return error; } /* host selectors */ - if ((error = construct_vmcs_host())) { - printk("construct_vmcs: construct_vmcs_host failed\n"); - rc = -EINVAL; - goto err_out; + if ( (error = construct_vmcs_host()) ) { + printk("construct_vmcs: construct_vmcs_host failed.\n"); + return error; } /* guest selectors */ - if ((error = construct_init_vmcs_guest(regs))) { - printk("construct_vmcs: construct_vmcs_guest failed\n"); - rc = -EINVAL; - goto err_out; + if ( (error = construct_init_vmcs_guest(regs)) ) { + printk("construct_vmcs: construct_vmcs_guest failed.\n"); + return error; } - if ((error |= __vmwrite(EXCEPTION_BITMAP, - MONITOR_DEFAULT_EXCEPTION_BITMAP))) { - printk("construct_vmcs: setting Exception bitmap failed\n"); - rc = -EINVAL; - goto err_out; + if ( (error = __vmwrite(EXCEPTION_BITMAP, + MONITOR_DEFAULT_EXCEPTION_BITMAP)) ) { + printk("construct_vmcs: setting exception bitmap failed.\n"); + return error; } - if (regs->eflags & EF_TF) - __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); + if ( regs->eflags & EF_TF ) + error = __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); else - __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); + error = __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); - return 0; + return error; +} -err_out: - vmx_destroy_vmcs(v); - return rc; +int vmx_create_vmcs(struct vcpu *v) +{ + if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL ) + return -ENOMEM; + __vmx_clear_vmcs(v); + return 0; } void vmx_destroy_vmcs(struct vcpu *v) @@ -501,14 +501,14 @@ void vmx_destroy_vmcs(struct vcpu *v) vmx_clear_vmcs(v); - free_vmcs(arch_vmx->vmcs); - arch_vmx->vmcs = NULL; + free_xenheap_pages(arch_vmx->io_bitmap_a, IO_BITMAP_ORDER); + free_xenheap_pages(arch_vmx->io_bitmap_b, IO_BITMAP_ORDER); - free_xenheap_pages(arch_vmx->io_bitmap_a, get_order_from_bytes(0x1000)); arch_vmx->io_bitmap_a = NULL; - - free_xenheap_pages(arch_vmx->io_bitmap_b, get_order_from_bytes(0x1000)); arch_vmx->io_bitmap_b = NULL; + + vmx_free_vmcs(arch_vmx->vmcs); + arch_vmx->vmcs = NULL; } void vm_launch_fail(unsigned long eflags) @@ -547,19 +547,20 @@ void arch_vmx_do_resume(struct vcpu *v) void arch_vmx_do_launch(struct vcpu *v) { - int error; cpu_user_regs_t *regs = ¤t->arch.guest_context.user_regs; - error = construct_vmcs(v, regs); - if ( error < 0 ) + vmx_load_vmcs(v); + + if ( construct_vmcs(v, regs) < 0 ) { - if (v->vcpu_id == 0) { - printk("Failed to construct a new VMCS for BSP.\n"); + if ( v->vcpu_id == 0 ) { + printk("Failed to construct VMCS for BSP.\n"); } else { - printk("Failed to construct a new VMCS for AP %d\n", v->vcpu_id); + printk("Failed to construct VMCS for AP %d.\n", v->vcpu_id); } domain_crash_synchronous(); } + vmx_do_launch(v); reset_stack_and_jump(vmx_asm_do_vmentry); } diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index df2e295acb..1f8dd198a7 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -54,34 +54,73 @@ static unsigned long trace_values[NR_CPUS][5]; static void vmx_ctxt_switch_from(struct vcpu *v); static void vmx_ctxt_switch_to(struct vcpu *v); -void vmx_final_setup_guest(struct vcpu *v) +static int vmx_initialize_guest_resources(struct vcpu *v) { + struct domain *d = v->domain; + struct vcpu *vc; + void *io_bitmap_a, *io_bitmap_b; + int rc; + v->arch.schedule_tail = arch_vmx_do_launch; v->arch.ctxt_switch_from = vmx_ctxt_switch_from; v->arch.ctxt_switch_to = vmx_ctxt_switch_to; - if ( v->vcpu_id == 0 ) - { - struct domain *d = v->domain; - struct vcpu *vc; + if ( v->vcpu_id != 0 ) + return 1; + for_each_vcpu ( d, vc ) + { /* Initialize monitor page table */ - for_each_vcpu(d, vc) - vc->arch.monitor_table = pagetable_null(); + vc->arch.monitor_table = pagetable_null(); - /* - * Required to do this once per domain - * XXX todo: add a seperate function to do these. - */ - memset(&d->shared_info->evtchn_mask[0], 0xff, - sizeof(d->shared_info->evtchn_mask)); - - /* Put the domain in shadow mode even though we're going to be using - * the shared 1:1 page table initially. It shouldn't hurt */ - shadow_mode_enable(d, - SHM_enable|SHM_refcounts| - SHM_translate|SHM_external|SHM_wr_pt_pte); + memset(&vc->arch.hvm_vmx, 0, sizeof(struct arch_vmx_struct)); + + if ( (rc = vmx_create_vmcs(vc)) != 0 ) + { + DPRINTK("Failed to create VMCS for vcpu %d: err=%d.\n", + vc->vcpu_id, rc); + return 0; + } + + spin_lock_init(&vc->arch.hvm_vmx.vmcs_lock); + + if ( (io_bitmap_a = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL ) + { + DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n", + vc->vcpu_id); + return 0; + } + + if ( (io_bitmap_b = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL ) + { + DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n", + vc->vcpu_id); + return 0; + } + + memset(io_bitmap_a, 0xff, 0x1000); + memset(io_bitmap_b, 0xff, 0x1000); + + /* don't bother debug port access */ + clear_bit(PC_DEBUG_PORT, io_bitmap_a); + + vc->arch.hvm_vmx.io_bitmap_a = io_bitmap_a; + vc->arch.hvm_vmx.io_bitmap_b = io_bitmap_b; } + + /* + * Required to do this once per domain XXX todo: add a seperate function + * to do these. + */ + memset(&d->shared_info->evtchn_mask[0], 0xff, + sizeof(d->shared_info->evtchn_mask)); + + /* Put the domain in shadow mode even though we're going to be using + * the shared 1:1 page table initially. It shouldn't hurt */ + shadow_mode_enable( + d, SHM_enable|SHM_refcounts|SHM_translate|SHM_external|SHM_wr_pt_pte); + + return 1; } static void vmx_relinquish_guest_resources(struct domain *d) @@ -90,9 +129,9 @@ static void vmx_relinquish_guest_resources(struct domain *d) for_each_vcpu ( d, v ) { + vmx_destroy_vmcs(v); if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) continue; - vmx_destroy_vmcs(v); free_monitor_pagetable(v); kill_timer(&v->arch.hvm_vmx.hlt_timer); if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) ) @@ -444,12 +483,6 @@ void stop_vmx(void) __vmxoff(); } -int vmx_initialize_guest_resources(struct vcpu *v) -{ - vmx_final_setup_guest(v); - return 1; -} - void vmx_migrate_timers(struct vcpu *v) { struct periodic_time *pt = &(v->domain->arch.hvm_domain.pl_time.periodic_tm); @@ -638,58 +671,61 @@ static int check_vmx_controls(u32 ctrls, u32 msr) int start_vmx(void) { - struct vmcs_struct *vmcs; - u32 ecx; u32 eax, edx; - u64 phys_vmcs; /* debugging */ + struct vmcs_struct *vmcs; /* * Xen does not fill x86_capability words except 0. */ - ecx = cpuid_ecx(1); - boot_cpu_data.x86_capability[4] = ecx; + boot_cpu_data.x86_capability[4] = cpuid_ecx(1); if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability))) return 0; rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx); - if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) { - if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) { + if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK ) + { + if ( (eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0 ) + { printk("VMX disabled by Feature Control MSR.\n"); return 0; } } - else { + else + { wrmsr(IA32_FEATURE_CONTROL_MSR, IA32_FEATURE_CONTROL_MSR_LOCK | IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0); } - if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS, - MSR_IA32_VMX_PINBASED_CTLS_MSR)) + if ( !check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS, + MSR_IA32_VMX_PINBASED_CTLS_MSR) ) return 0; - if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS, - MSR_IA32_VMX_PROCBASED_CTLS_MSR)) + if ( !check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS, + MSR_IA32_VMX_PROCBASED_CTLS_MSR) ) return 0; - if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS, - MSR_IA32_VMX_EXIT_CTLS_MSR)) + if ( !check_vmx_controls(MONITOR_VM_EXIT_CONTROLS, + MSR_IA32_VMX_EXIT_CTLS_MSR) ) return 0; - if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS, - MSR_IA32_VMX_ENTRY_CTLS_MSR)) + if ( !check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS, + MSR_IA32_VMX_ENTRY_CTLS_MSR) ) return 0; - set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */ + set_in_cr4(X86_CR4_VMXE); + + vmx_init_vmcs_config(); - if (!(vmcs = vmx_alloc_vmcs())) { - printk("Failed to allocate VMCS\n"); + if ( (vmcs = vmx_alloc_host_vmcs()) == NULL ) + { + printk("Failed to allocate host VMCS\n"); return 0; } - phys_vmcs = (u64) virt_to_maddr(vmcs); - - if (__vmxon(phys_vmcs)) { + if ( __vmxon(virt_to_maddr(vmcs)) ) + { printk("VMXON failed\n"); + vmx_free_host_vmcs(vmcs); return 0; } diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index dbc01c61c3..2b14b7c420 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -27,9 +27,7 @@ extern int start_vmx(void); extern void stop_vmx(void); extern void vmcs_dump_vcpu(void); -void vmx_final_setup_guest(struct vcpu *v); - -void vmx_enter_scheduler(void); +extern void vmx_init_vmcs_config(void); enum { VMX_CPU_STATE_PAE_ENABLED=0, @@ -46,8 +44,6 @@ struct vmcs_struct { unsigned char data [0]; /* vmcs size is read from MSR */ }; -extern int vmcs_size; - enum { VMX_INDEX_MSR_LSTAR = 0, VMX_INDEX_MSR_STAR, @@ -64,6 +60,10 @@ struct vmx_msr_state { unsigned long shadow_gs; }; +/* io bitmap is 4KBytes in size */ +#define IO_BITMAP_SIZE 0x1000 +#define IO_BITMAP_ORDER (get_order_from_bytes(IO_BITMAP_SIZE)) + struct arch_vmx_struct { /* Virtual address of VMCS. */ struct vmcs_struct *vmcs; @@ -101,7 +101,10 @@ struct arch_vmx_struct { void vmx_do_resume(struct vcpu *); -struct vmcs_struct *vmx_alloc_vmcs(void); +struct vmcs_struct *vmx_alloc_host_vmcs(void); +void vmx_free_host_vmcs(struct vmcs_struct *vmcs); + +int vmx_create_vmcs(struct vcpu *v); void vmx_destroy_vmcs(struct vcpu *v); void vmx_vmcs_enter(struct vcpu *v); void vmx_vmcs_exit(struct vcpu *v); -- 2.30.2